Azure OpenAI API (stable:2023-05-15)

2025/03/21 • 2 updated methods

GetChatCompletions (updated)
Description Gets chat completions for the provided chat messages. Completions support a wide variety of tasks and generate text that continues from or "completes" provided prompt data.
Reference Link ¶

⚶ Changes

{
  "#id": "GetChatCompletions",
  "$responses": {
    "200": {
      "$properties": [
        {
          "#name": "usage",
          "Required": {
            "new": false,
            "old": true
          }
        }
      ]
    }
  }
}

⚼ Request

POST:  /deployments/{deploymentId}/chat/completions
{
api-version: string ,
deploymentId: string ,
body:
{
messages:
[
{
role: enum ,
}
,
]
,
max_tokens: integer ,
temperature: number ,
top_p: number ,
logit_bias: object ,
user: string ,
n: integer ,
stop:
[
string ,
]
,
presence_penalty: number ,
frequency_penalty: number ,
stream: boolean ,
model: string ,
}
,
}

⚐ Response (200)

{
id: string ,
created: integer ,
choices:
[
{
message:
{
role: enum ,
content: string ,
}
,
index: integer ,
finish_reason: enum ,
delta:
{
role: enum ,
content: string ,
}
,
}
,
]
,
model: string ,
usage:
{
completion_tokens: integer ,
prompt_tokens: integer ,
total_tokens: integer ,
}
,
}

⚐ Response (default)

{
$headers:
{
x-ms-error-code: string ,
}
,
$schema:
{
error:
{
code: string ,
message: string ,
target: string ,
details:
[
string ,
]
,
innererror:
{
code: string ,
innererror: string ,
}
,
}
,
}
,
}
GetCompletions (updated)
Description Gets completions for the provided input prompts. Completions support a wide variety of tasks and generate text that continues from or "completes" provided prompt data.
Reference Link ¶

⚶ Changes

{
  "#id": "GetCompletions",
  "$responses": {
    "200": {
      "$properties": [
        {
          "#name": "usage",
          "Required": {
            "new": false,
            "old": true
          }
        }
      ]
    }
  }
}

⚼ Request

POST:  /deployments/{deploymentId}/completions
{
api-version: string ,
deploymentId: string ,
body:
{
prompt:
[
string ,
]
,
max_tokens: integer ,
temperature: number ,
top_p: number ,
logit_bias: object ,
user: string ,
n: integer ,
logprobs: integer ,
suffix: string ,
echo: boolean ,
stop:
[
string ,
]
,
presence_penalty: number ,
frequency_penalty: number ,
best_of: integer ,
stream: boolean ,
model: string ,
}
,
}

⚐ Response (200)

{
id: string ,
created: integer ,
choices:
[
{
text: string ,
index: integer ,
logprobs: object ,
finish_reason: enum ,
}
,
]
,
usage:
{
completion_tokens: integer ,
prompt_tokens: integer ,
total_tokens: integer ,
}
,
}

⚐ Response (default)

{
$headers:
{
x-ms-error-code: string ,
}
,
$schema:
{
error:
{
code: string ,
message: string ,
target: string ,
details:
[
string ,
]
,
innererror:
{
code: string ,
innererror: string ,
}
,
}
,
}
,
}